#from tools import init
import os
import tools
import glob
import cv2
import numpy as np
import pickle
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy
from scipy import signal
from collections import deque
out_dir = 'output_images/step2/'
tools.init()
# Undistort test images with visualization
%matplotlib inline
images = glob.glob('test_images/*.jpg')
gs = gridspec.GridSpec(8, 2)
gs.update(wspace=0.01, hspace=0.02) # set the spacing between axes.
plt.figure(figsize=(8,2))
for idx, fname in enumerate(images):
img = cv2.imread(fname)
dst = tools.undistort_img(img)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(cv2.cvtColor(dst, cv2.COLOR_BGR2RGB))
ax2.set_title('Undistorted Image', fontsize=30)
image_name=os.path.split(fname)[1]
write_name = out_dir + 'undistorted_' + image_name
cv2.imwrite(write_name,dst)
#cv2.imshow('undistorted', dst)
#cv2.waitKey(500)
cv2.destroyAllWindows()
def gaussian_blur(img, kernel_size=3):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def binarize(img,
s_thresh=(90, 255),
l_thresh=(40, 255),
sx_thresh=(20, 100), ksize_sx=3#11
):
# Convert to HLS color space and separate the L & S channels
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0, ksize=ksize_sx) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Threshold lightness
l_binary = np.zeros_like(l_channel)
l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1
binary = np.zeros_like(l_binary)
binary[(l_binary == 1) & (s_binary == 1) | (sxbinary == 1)] = 1
kernel = np.ones((3, 3), binary.dtype)
# remove white blobs
#binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
# fill black holes
#binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
# Stack each channel
# Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might
# be beneficial to replace this channel with something else.
# color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary))
color_binary = np.dstack((l_binary, sxbinary, s_binary))
binary = (np.dstack(( binary, binary, binary))*255.).astype('uint8')
return binary, color_binary
def binarize_img(img):
binary,_ = binarize(img)
return binary
image = mpimg.imread('test_images/test5.jpg')
image = tools.undistort_img(image)
_,color_binary = binarize(image)
assert(color_binary is not None)
plt.imsave(out_dir + 'binary_test5.jpg', color_binary)
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=40)
ax2.imshow(color_binary)
ax2.set_title('Binarized Image', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
images = glob.glob('test_images/*.jpg')
gs = gridspec.GridSpec(8, 2)
gs.update(wspace=0.01, hspace=0.02) # set the spacing between axes.
plt.figure(figsize=(8,2))
for idx, fname in enumerate(images):
image = mpimg.imread(fname)
image = tools.undistort_img(image)
# image = gaussian_blur(image)
binary = binarize_img(image)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(binary)
ax2.set_title('Binarized Image', fontsize=30)
image_name=os.path.split(fname)[1]
write_name = out_dir + 'binary_' + image_name
plt.imsave(write_name, binary)
#print(write_name)
cv2.destroyAllWindows()
bird_corners = tools.birdview_corners()
image = mpimg.imread('test_images/straight_lines1.jpg')
image = tools.undistort_img(image)
corner_tuples = []
for i,_ in enumerate(bird_corners):
corner_tuples.append(tuple(bird_corners[i]))
# draw: bottom-left, top-left, top-right, bottom-right
for i, j in [(0,1), (1,2), (2,3), (3,0)]:
cv2.line(image, corner_tuples[i], corner_tuples[j], color=[255,0,0], thickness=1)
warped = tools.warp_img(image)
plt.imsave(out_dir + 'straight_lines.jpg', image)
plt.imsave(out_dir + 'warped_straight_lines.jpg', warped)
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(25, 10))
f.tight_layout()
ax1.set_title('Undistorted Image with source points drawn', fontsize=35)
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.imshow(image)
ax2.set_title('Warped result with dest. points drawn', fontsize=35)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.imshow(warped)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
image = mpimg.imread('test_images/test5.jpg')
image = tools.undistort_img(image)
warp = tools.warp_img(image)
warp_roi = tools.ROI(warp)
binary = binarize_img(image)
binary = tools.warp_img(binary)
binary_roi = tools.ROI(binary)
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(25, 10))
f.tight_layout()
ax1.set_title('Undistorted Image', fontsize=35)
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.imshow(image)
ax2.set_title('Warped result with ROI', fontsize=35)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.imshow(warp_roi)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(25, 10))
f.tight_layout()
ax1.set_title('Warped Binary result', fontsize=35)
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.imshow(binary)
ax2.set_title('Warped binary result with RIO', fontsize=35)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.imshow(binary_roi)
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tools import binarize_pipeline
from detect_lane import find_peaks
# Test find_peaks
from detect_lane import find_peaks
img = mpimg.imread('test_images/test5.jpg')
binary = binarize_pipeline(img)
left_peak = find_peaks(binary, 300, verbose=True)
right_peak = find_peaks(binary, 1000, verbose=True)
def lane_fit_poly2(lanex, laney):
lane_fit = np.polyfit(laney, lanex, 2)
return lane_fit
#
# Finds left or right line using window area, sliding along Y direction.
# Params: binary - binary warped image.
# x_base - expected center of window for line searching at the bottom of image.
# margin - sets the width of the windows +/- margin
# Returns: line as two arrays of X coords and Y coords,
# line_fit as np.array, list of window rectangles if verbose=True.
#
def detect_line(binary, x_base, margin=100, verbose=False):
# These will be the starting point for the left and right lines
midpoint = np.int(binary.shape[1]/2)
# Visualization of window rectangles
win_rects = []
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
x_current = x_base
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary.shape[0] - (window+1)*window_height
win_y_high = binary.shape[0] - window*window_height
win_x_low = x_current - margin
win_x_high = x_current + margin
if verbose:
win_rects.append(((win_x_low,win_y_low),(win_x_high,win_y_high)))
# Identify the nonzero pixels in x and y within the window
good_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_x_low) & (nonzerox < win_x_high)).nonzero()[0]
# Append these indices to the lists
lane_inds.append(good_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_inds) > minpix:
x_current = np.int(np.mean(nonzerox[good_inds]))
# Concatenate the arrays of indices
lane_inds = np.concatenate(lane_inds)
# Extract left and right line pixel positions
lanex = nonzerox[lane_inds]
laney = nonzeroy[lane_inds]
# Fit a second order polynomial to each
lane_fit = np.empty(shape=(0,0))
if len(laney):
lane_fit = np.polyfit(laney, lanex, 2)
return (lanex, laney), lane_fit, win_rects
#
# Draws detected lanes to an image.
# Params: binary - binary warped image (1 channel).
# left_linex - x points of left lane in binary image.
# left_liney - y points of left lane in binary image.
# right_linex - x points of right lane in binary image.
# right_liney - y points of right lane in binary image.
# left_winds - windows for left lane.
# left_winds - windows for right lane.
# Returns: RGB image, left_fitx, right_fitx, ploty.
#
def draw_lanes_with_windows(binary,
left_linex, left_liney, left_fit,
right_linex, right_liney, right_fit,
left_winds, right_winds):
#print(type(left_fit))
out_img = np.dstack((binary, binary, binary))*255
left_fitx = []
right_fitx = []
ploty = []
# Generate x and y values for plotting
ploty = np.linspace(0, out_img.shape[0]-1, out_img.shape[0] )
if len(left_linex):
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
if len(right_linex):
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Draw green window rectangles
for wl, wr in zip(left_winds, right_winds):
cv2.rectangle(out_img,wl[0],wl[1],(0,255,0), 2)
cv2.rectangle(out_img,wr[0],wr[1],(0,255,0), 2)
if len(left_linex):
out_img[left_liney, left_linex] = [255,0,0]
if len(right_linex):
out_img[right_liney, right_linex] = [0,0,255]
# Cast the x and y points into format compatible with cv2.fillPoly()
left_pts = None
right_pts = None
if len(left_linex):
left_fitx_pts1 = np.array([np.transpose(np.vstack([left_fitx-2, ploty]))])
left_fitx_pts2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+2, ploty])))])
left_pts = np.hstack((left_fitx_pts1, left_fitx_pts2))
if len(right_linex):
right_fitx_pts1 = np.array([np.transpose(np.vstack([right_fitx-2, ploty]))])
right_fitx_pts2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+2, ploty])))])
right_pts = np.hstack((right_fitx_pts1, right_fitx_pts2))
# Draw yellow lanes in output image
if len(left_linex) or len(right_linex):
window_img = np.zeros_like(out_img)
if len(left_linex):
cv2.fillPoly(window_img, np.int_([left_pts]), (255,255, 0))
if len(right_linex):
cv2.fillPoly(window_img, np.int_([right_pts]), (255,255, 0))
if len(left_linex) or len(right_linex):
out_img = cv2.addWeighted(out_img, 1, window_img, 1., 0)
return out_img
# Detect left and right lines
%matplotlib inline
print('left_peak:', left_peak)
print('right_peak:', right_peak)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary, binary, binary))*255
#left_peak = 0
#right_peak = 0
leftx = np.empty(shape=(0,0))
lefty = np.empty(shape=(0,0))
left_fit = np.empty(shape=(0,0))
left_win_rects = np.empty(shape=(0,0))
rightx = np.empty(shape=(0,0))
righty = np.empty(shape=(0,0))
right_fit = np.empty(shape=(0,0))
right_win_rects = np.empty(shape=(0,0))
if left_peak > 0:
(leftx, lefty), left_fit, left_win_rects = detect_line(binary, left_peak, verbose=True)
if right_peak > 0:
(rightx, righty), right_fit, right_win_rects = detect_line(binary, right_peak, verbose=True)
print('left_fit: ', left_fit)
print('right_fit: ', right_fit)
out_img = draw_lanes_with_windows(binary,
leftx, lefty, left_fit,
rightx, righty, right_fit,
left_win_rects, right_win_rects)
plt.figure(figsize=(8,8))
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.imshow(out_img)
# Line detection in ROI
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
def detect_line_in_roi(binary, line_fit, margin=100):
nonzero = binary.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
line_lane_inds = []
if len(line_fit):
line_lane_inds = ((nonzerox > (line_fit[0]*(nonzeroy**2) + line_fit[1]*nonzeroy + line_fit[2] - margin)) & (nonzerox < (line_fit[0]*(nonzeroy**2) + line_fit[1]*nonzeroy + line_fit[2] + margin)))
# Again, extract left and right line pixel positions
linex = nonzerox[line_lane_inds]
liney = nonzeroy[line_lane_inds]
# Fit a second order polynomial to each
if len(linex) and len(liney):
line_fit = np.polyfit(liney, linex, 2)
else:
line_fit = np.empty(shape=(0,0))
# Generate x and y values for plotting
# ploty = np.linspace(0, binary.shape[0]-1, binary.shape[0] )
# line_fitx = line_fit[0]*ploty**2 + line_fit[1]*ploty + line_fit[2]
return (linex, liney), line_fit
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
def draw_detect_line_in_roi(binary_warped,
left_fit, leftx, lefty,
right_fit, rightx, righty,
margin=100):
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
left_fitx = []
right_fitx = []
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
if len(leftx):
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
if len(rightx):
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_pts = []
right_line_pts = []
if len(leftx):
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
if len(rightx):
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
if len(left_line_pts):
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
if len(right_line_pts):
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# Draw yellow lanes...
# Cast the x and y points into format compatible with cv2.fillPoly()
left_pts = []
right_pts = []
if len(left_fitx):
left_fitx_pts1 = np.array([np.transpose(np.vstack([left_fitx-2, ploty]))])
left_fitx_pts2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+2, ploty])))])
left_pts = np.hstack((left_fitx_pts1, left_fitx_pts2))
if len(right_fitx):
right_fitx_pts1 = np.array([np.transpose(np.vstack([right_fitx-2, ploty]))])
right_fitx_pts2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+2, ploty])))])
right_pts = np.hstack((right_fitx_pts1, right_fitx_pts2))
# Draw yellow lanes in output image
window_img = np.zeros_like(out_img)
if len(left_pts):
cv2.fillPoly(window_img, np.int_([left_pts]), (255,255, 0))
if len(right_pts):
cv2.fillPoly(window_img, np.int_([right_pts]), (255,255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 1., 0)
return out_img
leftx1=[]
lefty1=[]
left_fit1=[]
if len(left_fit):
(leftx1, lefty1), left_fit1 = detect_line_in_roi(binary, left_fit)
(rightx1, righty1), right_fit1 = detect_line_in_roi(binary, right_fit)
out_img = draw_detect_line_in_roi(binary, left_fit1, leftx1, lefty1, right_fit1, rightx1, righty1)
plt.figure(figsize=(8,8))
plt.imshow(out_img)
plt.xlim(0, out_img.shape[1])
plt.ylim(out_img.shape[0], 0)
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self, line_pos, img_width, n = 5):
# size of queue to store data for last delected n frames
self.n = n
#number of fitted lines in buffer
self.n_buffered = 0
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = deque([],maxlen=n)#[]
# fit-coeffs of the last n fits of the line
self.recent_fit_coeffs = deque([],maxlen=n)
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.avg_fit = None
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
# position in pixels of fitted line at the bottom of image
self.line_pos = line_pos
# polynomial coefficients of the most recent fit
self.current_fit = [np.array([False])]
# x values of the most recent fit
self.current_fit_x_vals = [np.array([False])]
# y values for line fit: const y-grid for image
self.fit_y_vals = np.linspace(0, 100, num=101) * 7.2
# center of image along x is used as base pos.
self.basepos = img_width/2
def set_current_fit_x_vals(self):
yvals = self.fit_y_vals
self.current_fit_x_vals = self.current_fit_coeffs[0]*yvals**2 + self.current_fit_coeffs[1]*yvals + self.current_fit_coeffs[2]
pass
def set_current_fit_coeffs(self):
self.current_fit_coeffs = np.polyfit(self.ally, self.allx, 2)
def set_line_base_pos(self):
y_eval = max(self.fit_y_vals)
self.line_pos = self.current_fit_coeffs[0]*y_eval**2 + self.current_fit_coeffs[1]*y_eval + self.current_fit_coeffs[2]
self.line_base_pos = (self.line_pos - self.basepos)*3.7/600.0 # 3.7 meters is ~600 pixels along x direction
def calc_diffs(self):
if self.n_buffered > 0:
self.diffs = self.current_fit_coeffs - self.avg_fit
else:
self.diffs = np.array([0,0,0], dtype='float')
def push_data(self):
self.recent_xfitted.appendleft(self.current_fit_xvals)
self.recent_fit_coeffs.appendleft(self.current_fit_coeffs)
assert(len(self.recent_xfitted)==len(self.recent_fit_coeffs))
self.n_buffered = len(self.recent_xfitted)
def pop_data(self):
if self.n_buffered > 0:
self.recent_xfitted.pop()
self.recent_fit_coeffs.pop()
assert(len(self.recent_xfitted)==len(self.recent_fit_coeffs))
self.n_buffered = len(self.recent_xfitted)
def update(self, binary, verbose=False):
if self.detected:
(self.allx, self.ally), self.current_fit = detect_line_in_roi(self.current_fit, self.current_fit)
else:
self.allx = np.empty(shape=(0,0))
self.ally = np.empty(shape=(0,0))
self.current_fit = np.empty(shape=(0,0))
peak = find_peaks(binary, self.line_pos, verbose=verbose)
win_rects = []
if peak:
(self.allx, self.ally), self.current_fit, win_rects = detect_line(binary, self.line_pos, verbose=verbose)
self.set_current_fit_coeffs()
self.set_current_fit_x_vals()
self.set_line_base_pos()
self.calc_diffs()
pass
def process_image(img, leftL, rightL, verbose=True):
binary = tools.binarize_pipeline(img)
# set left line position
leftL.update(binary, verbose)
# set right line position
rightL.update(binary, verbose)
return img
import numpy as np
import cv2
import matplotlib.pyplot as plt
# Read in a thresholded image
img = mpimg.imread('test_images/test5.jpg')
img_width = img.shape[1]
left_line = Line(327, img_width, 5)
right_right = Line(1018, img_width, 5)
result = process_image(img, left_line, right_right)
result = process_image(img, left_line, right_right)
plt.figure()
plt.imshow(result)
plt.show()
plt.imsave(out_dir + 'projected_lane_test5.jpg',result)